knitr::opts_chunk$set(echo = TRUE, warning = FALSE, comment = FALSE, message = FALSE,
                      cache = FALSE)

library(tidyverse)
## ── Attaching core tidyverse packages ──────────────────────── tidyverse 2.0.0 ──
## ✔ dplyr     1.1.2     ✔ readr     2.1.4
## ✔ forcats   1.0.0     ✔ stringr   1.5.0
## ✔ ggplot2   3.4.2     ✔ tibble    3.2.1
## ✔ lubridate 1.9.2     ✔ tidyr     1.3.0
## ✔ purrr     1.0.1     
## ── Conflicts ────────────────────────────────────────── tidyverse_conflicts() ──
## ✖ dplyr::filter() masks stats::filter()
## ✖ dplyr::lag()    masks stats::lag()
## ℹ Use the conflicted package (<http://conflicted.r-lib.org/>) to force all conflicts to become errors
library(xgboost)
## 
## Attaching package: 'xgboost'
## 
## The following object is masked from 'package:dplyr':
## 
##     slice
library(Metrics)
library(ggpmisc)
## Loading required package: ggpp
## 
## Attaching package: 'ggpp'
## 
## The following object is masked from 'package:ggplot2':
## 
##     annotate
library(ggthemes)

match_dir = 'data/matchups/'
model_dir = 'data/models/'

# Set random seed
set.seed(799)

Purpose

The purpose of this script is to apply the xgboost algorithm to Remote Sensing Imagery of Lake Yojoa in Honduras, to estimate Yojoa water clarity. You can read more about this lake here.

In this ‘more paranoid’ approach, we use a train-test-validate method, where the validation is holdout data from the train-test to confirm model performance.

Load matchup data

#list all the files in the match directory
match = list.files(match_dir)

prepData = function(df) {
  #make a rowid column
  df_prep = df %>% 
    rowid_to_column() %>% 
    mutate(secchi = as.numeric(secchi)) %>% #there's one wonky value in here with two decimal points... dropping from this analysis
    filter(!is.na(secchi))
}


#load the matchup files 
threeDay = read.csv(file.path(match_dir, match[grepl('three', match) & !grepl('us', match)])) %>%
  prepData(.)
fiveDay = read.csv(file.path(match_dir, match[grepl('five', match) & !grepl('us', match)])) %>%
  prepData(.)
jemma = read.csv(file.path(match_dir, match[grepl('jemma', match)])) %>% 
  prepData(.)

We want to predict the secchi value in these datasets, so let’s set the target as that variable:

## Identify our target (value is secchi)
target <- 'secchi'

Xgboost runs

Make test, training, validation sets

For each dataset, let’s grab 60% of the data as the ‘train’ set, and split the remainder between ‘test’ and ‘val’.

Three Day Window

##Pull 60% as training data
train_3 <- threeDay %>%
  sample_frac(0.6) 

test_3 <- threeDay %>%
  filter(!rowid %in% train_3$rowid) %>%
  sample_frac(0.5)

val_3 <- threeDay %>%
  filter(!rowid %in% test_3$rowid) %>%
  filter(!rowid %in% train_3$rowid)

Five Day Window

##Pull 60% as training data
train_5 <- fiveDay %>%
  sample_frac(0.6) 

test_5 <- fiveDay %>%
  filter(!rowid %in% train_5$rowid) %>%
  sample_frac(0.5)

val_5 <-fiveDay %>%
  filter(!rowid %in% test_5$rowid) %>%
  filter(!rowid %in% train_5$rowid)

Local Knowledge Window

The ‘local knowledge’ window creates a variable matchup window, where matchups can be within 7 days in all times of year except October and November, when the lake can have dramatic shifts in clarity in a short period of time.

train_j <- jemma %>% 
  sample_frac(0.6)

test_j <- jemma %>%
  filter(!rowid %in% train_j$rowid) %>%
  sample_frac(0.5)

val_j <- jemma %>%
  filter(!rowid %in% test_j$rowid) %>%
  filter(!rowid %in% train_j$rowid)

Name feature groups

Here, we indicate the features to be used in our models. We’ll use the visual bands and add in summaries of ERA5 met data. In our datasets, the 5-day met summaries have the suffix ‘_5’, etc. The first two models only inlcude recent weather summaries, but the final two models include 5 or 7 day weather summaries as well as the previous day’s weather.

band_met3_feats <-  c('med_Blue_corr', 'med_Green_corr', 'med_Red_corr', 'med_Nir_corr',
                     'RN', 'BG', 'RB','GB',
                     'tot_sol_rad_KJpm2_3', 'max_temp_degK_3', 'mean_temp_degK_3', 'min_temp_degK_3',
                     'tot_precip_m_3', 'mean_wind_mps_3')

band_met5_feats <- c('med_Blue_corr', 'med_Green_corr', 'med_Red_corr', 'med_Nir_corr',
                     'RN', 'BG', 'RB','GB',
                     'tot_sol_rad_KJpm2_5', 'max_temp_degK_5', 'mean_temp_degK_5', 'min_temp_degK_5',
                     'tot_precip_m_5', 'mean_wind_mps_5')

band_met51_feats <- c('med_Blue_corr', 'med_Green_corr', 'med_Red_corr', 'med_Nir_corr',
                     'RN', 'BG', 'RB','GB',
                     'tot_sol_rad_KJpm2_5', 'max_temp_degK_5', 'mean_temp_degK_5', 'min_temp_degK_5',
                     'tot_precip_m_5', 'mean_wind_mps_5',
                     'solar_rad_KJpm2_prev', 'precip_m_prev','air_temp_degK_prev','wind_speed_mps_prev')

band_met71_feats <- c('med_Blue_corr', 'med_Green_corr', 'med_Red_corr', 'med_Nir_corr',
                     'RN', 'BG', 'RB','GB',
                     'tot_sol_rad_KJpm2_7', 'max_temp_degK_7', 'mean_temp_degK_7', 'min_temp_degK_7',
                     'tot_precip_m_7', 'mean_wind_mps_7',
                     'solar_rad_KJpm2_prev', 'precip_m_prev','air_temp_degK_prev','wind_speed_mps_prev')

Format data for xgboost

3 Day Window

## 3 day window, 3 days previous met
dtrain_3d_3m <- xgb.DMatrix(data = as.matrix(train_3[,band_met3_feats]), 
                            label = train_3[,target])

dtest_3d_3m <- xgb.DMatrix(data = as.matrix(test_3[,band_met3_feats]), 
                     label = test_3[,target])

dval_3d_3m <- xgb.DMatrix(data = as.matrix(val_3[,band_met3_feats]), 
                     label = val_3[,target])

## 3 day window, 5 days previous met
dtrain_3d_5m <- xgb.DMatrix(data = as.matrix(train_3[,band_met5_feats]), 
                            label = train_3[,target])

dtest_3d_5m <- xgb.DMatrix(data = as.matrix(test_3[,band_met5_feats]), 
                     label = test_3[,target])

dval_3d_5m <- xgb.DMatrix(data = as.matrix(val_3[,band_met5_feats]), 
                     label = val_3[,target])

## 3 day window, 5/1 days previous met
dtrain_3d_51m <- xgb.DMatrix(data = as.matrix(train_3[,band_met51_feats]), 
                            label = train_3[,target])

dtest_3d_51m <- xgb.DMatrix(data = as.matrix(test_3[,band_met51_feats]), 
                     label = test_3[,target])

dval_3d_51m <- xgb.DMatrix(data = as.matrix(val_3[,band_met51_feats]), 
                     label = val_3[,target])

## 3 day window, 7/1 days previous met
dtrain_3d_71m <- xgb.DMatrix(data = as.matrix(train_3[,band_met71_feats]), 
                            label = train_3[,target])

dtest_3d_71m <- xgb.DMatrix(data = as.matrix(test_3[,band_met71_feats]), 
                     label = test_3[,target])

dval_3d_71m <- xgb.DMatrix(data = as.matrix(val_3[,band_met71_feats]), 
                     label = val_3[,target])

5 Day Window

## 5 day window, 3 days previous met
dtrain_5d_3m <- xgb.DMatrix(data = as.matrix(train_5[,band_met3_feats]), 
                            label = train_5[,target])

dtest_5d_3m <- xgb.DMatrix(data = as.matrix(test_5[,band_met3_feats]), 
                     label = test_5[,target])

dval_5d_3m <- xgb.DMatrix(data = as.matrix(val_5[,band_met3_feats]), 
                     label = val_5[,target])

## 5 day window, 5 days previous met
dtrain_5d_5m <- xgb.DMatrix(data = as.matrix(train_5[,band_met5_feats]), 
                            label = train_5[,target])

dtest_5d_5m <- xgb.DMatrix(data = as.matrix(test_5[,band_met5_feats]), 
                     label = test_5[,target])

dval_5d_5m <- xgb.DMatrix(data = as.matrix(val_5[,band_met5_feats]), 
                     label = val_5[,target])

## 5 day window, 5/1 days previous met
dtrain_5d_51m <- xgb.DMatrix(data = as.matrix(train_5[,band_met51_feats]), 
                            label = train_5[,target])

dtest_5d_51m <- xgb.DMatrix(data = as.matrix(test_5[,band_met51_feats]), 
                     label = test_5[,target])

dval_5d_51m <- xgb.DMatrix(data = as.matrix(val_5[,band_met51_feats]), 
                     label = val_5[,target])

## 5 day window, 7/1 days previous met
dtrain_5d_71m <- xgb.DMatrix(data = as.matrix(train_5[,band_met71_feats]), 
                            label = train_5[,target])

dtest_5d_71m <- xgb.DMatrix(data = as.matrix(test_5[,band_met71_feats]), 
                     label = test_5[,target])

dval_5d_71m <- xgb.DMatrix(data = as.matrix(val_5[,band_met71_feats]), 
                     label = val_5[,target])

Local Knowledge Window

## jemma window, 5 days previous met
dtrain_jd_5m <- xgb.DMatrix(data = as.matrix(train_j[,band_met5_feats]), 
                            label = train_j[,target])

dtest_jd_5m <- xgb.DMatrix(data = as.matrix(test_j[,band_met5_feats]), 
                     label = test_j[,target])

dval_jd_5m <- xgb.DMatrix(data = as.matrix(val_j[,band_met5_feats]), 
                     label = val_j[,target])

## jemma window, 3 days previous met
dtrain_jd_3m <- xgb.DMatrix(data = as.matrix(train_j[,band_met3_feats]), 
                            label = train_j[,target])

dtest_jd_3m <- xgb.DMatrix(data = as.matrix(test_j[,band_met3_feats]), 
                     label = test_j[,target])

dval_jd_3m <- xgb.DMatrix(data = as.matrix(val_j[,band_met3_feats]), 
                     label = val_j[,target])

## jemma window, 5/1 days previous met
dtrain_jd_51m <- xgb.DMatrix(data = as.matrix(train_j[,band_met51_feats]), 
                            label = train_j[,target])

dtest_jd_51m <- xgb.DMatrix(data = as.matrix(test_j[,band_met51_feats]), 
                     label = test_j[,target])

dval_jd_51m <- xgb.DMatrix(data = as.matrix(val_j[,band_met51_feats]), 
                     label = val_j[,target])

## jemma window, 7/1 days previous met
dtrain_jd_71m <- xgb.DMatrix(data = as.matrix(train_j[,band_met71_feats]), 
                            label = train_j[,target])

dtest_jd_71m <- xgb.DMatrix(data = as.matrix(test_j[,band_met71_feats]), 
                     label = test_j[,target])

dval_jd_71m <- xgb.DMatrix(data = as.matrix(val_j[,band_met71_feats]), 
                     label = val_j[,target])

Parameter optimization

This is an xgboost optimization method developed by Sam Sillen where you list many possible hyperparameter options and then create a matrix of all possible combinations - aka ‘grid search’ - and grab the top 20 performing combinations of hyperparameters by square error (our loss statistic).

grid_train <- expand.grid(
  max_depth= c(3,6,8),
  subsample = c(.5,.8,1),
  colsample_bytree= c(.5,.8,1),
  eta = c(0.1, 0.3),
  min_child_weight= c(3,5,7)
)

hypertune_xgboost = function(train, test, grid){
  
  params <- list(booster = "gbtree", objective = 'reg:squarederror', 
                 eta=grid$eta ,max_depth=grid$max_depth, 
                 min_child_weight=grid$min_child_weight,
                 subsample=grid$subsample, 
                 colsample_bytree=grid$colsample_bytree)
  
  xgb.naive <- xgb.train(params = params, data = train, nrounds = 1000, 
                         watchlist = list(train = train, val = test), 
                         verbose = 0,
                         early_stopping_rounds = 20)
  
  summary <- grid %>% mutate(val_loss = xgb.naive$best_score, best_message = xgb.naive$best_msg,
                             mod = list(xgb.naive)) 
  
  return(summary) 
  
}

3 day window, 3 day met hypertuning

Note, evaluation is turned off for these chunks as to not overwrite previous models and parameter tuning in next section.

## Hypertune xgboost 3 day window, 3 day met
xgboost_hypertune_3d_3m <- grid_train %>%
  pmap_dfr(function(...) {
    current <- tibble(...)
    hypertune_xgboost(dtrain_3d_3m,dtest_3d_3m,current)
  })

mod_summary_3d_3m <- xgboost_hypertune_3d_3m %>% 
  arrange(val_loss) %>%
  dplyr::slice(1:20)

best_mod_3d_3m <- xgboost_hypertune_3d_3m[xgboost_hypertune_3d_3m$val_loss==min(xgboost_hypertune_3d_3m$val_loss),]

save(mod_summary_3d_3m,best_mod_3d_3m, file = 'data/models/paramsxg_mp_val_3d_3m.RData')

3 day window, 5 day met hypertuning

## Hypertune xgboost 3 day window, 5 day met
xgboost_hypertune_3d_5m <- grid_train %>%
  pmap_dfr(function(...) {
    current <- tibble(...)
    hypertune_xgboost(dtrain_3d_5m,dtest_3d_5m,current)
  })

mod_summary_3d_5m <- xgboost_hypertune_3d_5m %>% 
  arrange(val_loss) %>%
  dplyr::slice(1:20)

best_mod_3d_5m <- xgboost_hypertune_3d_5m[xgboost_hypertune_3d_5m$val_loss==min(xgboost_hypertune_3d_5m$val_loss),]

save(mod_summary_3d_5m,best_mod_3d_5m, file = 'data/models/paramsxg_mp_val_3d_5m.RData')

3 day window, 5/1 day met hypertuning

## Hypertune xgboost 5 day window, 5/1 day met
xgboost_hypertune_3d_51m <- grid_train %>%
  pmap_dfr(function(...) {
    current <- tibble(...)
    hypertune_xgboost(dtrain_3d_51m,dtest_3d_51m,current)
  })

mod_summary_3d_51m <- xgboost_hypertune_3d_51m %>% 
  arrange(val_loss) %>%
  dplyr::slice(1:20)

best_mod_3d_51m <- xgboost_hypertune_3d_51m[xgboost_hypertune_3d_51m$val_loss==min(xgboost_hypertune_3d_51m$val_loss),]

save(mod_summary_3d_51m,best_mod_3d_51m, file = 'data/models/paramsxg_mp_val_3d_51m.RData')

3 day window, 7/1 day met hypertuning

## Hypertune xgboost 5 day window, 7/1 day met
xgboost_hypertune_3d_71m <- grid_train %>%
  pmap_dfr(function(...) {
    current <- tibble(...)
    hypertune_xgboost(dtrain_3d_71m,dtest_3d_71m,current)
  })

mod_summary_3d_71m <- xgboost_hypertune_3d_71m %>% 
  arrange(val_loss) %>%
  dplyr::slice(1:20)

best_mod_3d_71m <- xgboost_hypertune_3d_71m[xgboost_hypertune_3d_71m$val_loss==min(xgboost_hypertune_3d_71m$val_loss),]

save(mod_summary_3d_71m,best_mod_3d_71m, file = 'data/models/paramsxg_mp_val_3d_71m.RData')

5 day window, 3 day met hypertuning

## Hypertune xgboost 5 day window, 3 day met
xgboost_hypertune_5d_3m <- grid_train %>%
  pmap_dfr(function(...) {
    current <- tibble(...)
    hypertune_xgboost(dtrain_5d_3m,dtest_5d_3m,current)
  })

mod_summary_5d_3m <- xgboost_hypertune_5d_3m %>% 
  arrange(val_loss) %>%
  dplyr::slice(1:20)

best_mod_5d_3m <- xgboost_hypertune_5d_3m[xgboost_hypertune_5d_3m$val_loss==min(xgboost_hypertune_5d_3m$val_loss),]

save(mod_summary_5d_3m,best_mod_5d_3m, file = 'data/models/paramsxg_mp_val_5d_3m.RData')

5 day window, 5 day met hypertuning

## Hypertune xgboost 5 day window, 5 day met
xgboost_hypertune_5d_5m <- grid_train %>%
  pmap_dfr(function(...) {
    current <- tibble(...)
    hypertune_xgboost(dtrain_5d_5m,dtest_5d_5m,current)
  })

mod_summary_5d_5m <- xgboost_hypertune_5d_5m %>% 
  arrange(val_loss) %>%
  dplyr::slice(1:20)

best_mod_5d_5m <- xgboost_hypertune_5d_5m[xgboost_hypertune_5d_5m$val_loss==min(xgboost_hypertune_5d_5m$val_loss),]

save(mod_summary_5d_5m,best_mod_5d_5m, file = 'data/models/paramsxg_mp_val_5d_5m.RData')

5 day window, 5/1 day met hypertuning

## Hypertune xgboost 5 day window, 5/1 day met
xgboost_hypertune_5d_51m <- grid_train %>%
  pmap_dfr(function(...) {
    current <- tibble(...)
    hypertune_xgboost(dtrain_5d_51m,dtest_5d_51m,current)
  })

mod_summary_5d_51m <- xgboost_hypertune_5d_51m %>% 
  arrange(val_loss) %>%
  dplyr::slice(1:20)

best_mod_5d_51m <- xgboost_hypertune_5d_51m[xgboost_hypertune_5d_51m$val_loss==min(xgboost_hypertune_5d_51m$val_loss),]

save(mod_summary_5d_51m,best_mod_5d_51m, file = 'data/models/paramsxg_mp_val_5d_51m.RData')

5 day window, 7/1 day met hypertuning

## Hypertune xgboost 5 day window, 7/1 day met
xgboost_hypertune_5d_71m <- grid_train %>%
  pmap_dfr(function(...) {
    current <- tibble(...)
    hypertune_xgboost(dtrain_5d_71m,dtest_5d_71m,current)
  })

mod_summary_5d_71m <- xgboost_hypertune_5d_71m %>% 
  arrange(val_loss) %>%
  dplyr::slice(1:20)

best_mod_5d_71m <- xgboost_hypertune_5d_71m[xgboost_hypertune_5d_71m$val_loss==min(xgboost_hypertune_5d_71m$val_loss),]

save(mod_summary_5d_71m,best_mod_5d_71m, file = 'data/models/paramsxg_mp_val_5d_71m.RData')

Jemma-special, 3 day met

## Hypertune xgboost jemma window, 3 day met
xgboost_hypertune_jd_3m <- grid_train %>%
  pmap_dfr(function(...) {
    current <- tibble(...)
    hypertune_xgboost(dtrain_jd_3m,dtest_jd_3m,current)
  })

mod_summary_jd_3m <- xgboost_hypertune_jd_3m %>% 
  arrange(val_loss) %>%
  dplyr::slice(1:20)

best_mod_jd_3m <- xgboost_hypertune_jd_3m[xgboost_hypertune_jd_3m$val_loss==min(xgboost_hypertune_jd_3m$val_loss),]

save(mod_summary_jd_3m,best_mod_jd_3m, file = 'data/models/paramsxg_mp_val_jd_3m.RData')

Jemma-special, 5 day met

## Hypertune xgboost jemma window, 5 day met
xgboost_hypertune_jd_5m <- grid_train %>%
  pmap_dfr(function(...) {
    current <- tibble(...)
    hypertune_xgboost(dtrain_jd_5m,dtest_jd_5m,current)
  })

mod_summary_jd_5m <- xgboost_hypertune_jd_5m %>% 
  arrange(val_loss) %>%
  dplyr::slice(1:20)

best_mod_jd_5m <- xgboost_hypertune_jd_5m[xgboost_hypertune_jd_5m$val_loss==min(xgboost_hypertune_jd_5m$val_loss),]

save(mod_summary_jd_5m,best_mod_jd_5m, file = 'data/models/paramsxg_mp_val_jd_5m.RData')

Jemma-special, 5/1 day met

## Hypertune xgboost jemma window, 5/1 day met
xgboost_hypertune_jd_51m <- grid_train %>%
  pmap_dfr(function(...) {
    current <- tibble(...)
    hypertune_xgboost(dtrain_jd_51m,dtest_jd_51m,current)
  })

mod_summary_jd_51m <- xgboost_hypertune_jd_51m %>% 
  arrange(val_loss) %>%
  dplyr::slice(1:20)

best_mod_jd_51m <- xgboost_hypertune_jd_51m[xgboost_hypertune_jd_51m$val_loss==min(xgboost_hypertune_jd_51m$val_loss),]

save(mod_summary_jd_51m,best_mod_jd_51m, file = 'data/models/paramsxg_mp_val_jd_51m.RData')

Jemma-special, 7/1 day met

## Hypertune xgboost jemma window, 7/1 day met
xgboost_hypertune_jd_71m <- grid_train %>%
  pmap_dfr(function(...) {
    current <- tibble(...)
    hypertune_xgboost(dtrain_jd_71m,dtest_jd_71m,current)
  })

mod_summary_jd_71m <- xgboost_hypertune_jd_71m %>% 
  arrange(val_loss) %>%
  dplyr::slice(1:20)

best_mod_jd_71m <- xgboost_hypertune_jd_71m[xgboost_hypertune_jd_71m$val_loss==min(xgboost_hypertune_jd_71m$val_loss),]

save(mod_summary_jd_71m,best_mod_jd_71m, file = 'data/models/paramsxg_mp_val_jd_71m.RData')

Model Assessment and Application

load model summaries

load('data/models/paramsxg_mp_val_3d_3m.RData')
load('data/models/paramsxg_mp_val_3d_5m.RData')
load('data/models/paramsxg_mp_val_3d_71m.RData')
load('data/models/paramsxg_mp_val_3d_51m.RData')
load('data/models/paramsxg_mp_val_5d_3m.RData')
load('data/models/paramsxg_mp_val_5d_5m.RData')
load('data/models/paramsxg_mp_val_5d_51m.RData')
load('data/models/paramsxg_mp_val_5d_71m.RData')
load('data/models/paramsxg_mp_val_jd_3m.RData')
load('data/models/paramsxg_mp_val_jd_5m.RData')
load('data/models/paramsxg_mp_val_jd_51m.RData')
load('data/models/paramsxg_mp_val_jd_71m.RData')

Now that these are loaded, we need to look at the test/train statistics. Ideally the train/test RMSE are relatively close so we don’t choose too overfit of a model. Below, we apply the model to the validation dataset and plot the validation observed versus predicted.

Three day window dataset

mod_summary_3d_3m %>% select(best_message)
FALSE # A tibble: 20 × 1
FALSE    best_message                                   
FALSE    <chr>                                          
FALSE  1 "[103]\ttrain-rmse:0.113883\tval-rmse:0.624023"
FALSE  2 "[114]\ttrain-rmse:0.075017\tval-rmse:0.629418"
FALSE  3 "[41]\ttrain-rmse:0.507325\tval-rmse:0.629930" 
FALSE  4 "[11]\ttrain-rmse:0.357180\tval-rmse:0.645146" 
FALSE  5 "[48]\ttrain-rmse:0.465745\tval-rmse:0.646048" 
FALSE  6 "[86]\ttrain-rmse:0.154512\tval-rmse:0.647932" 
FALSE  7 "[13]\ttrain-rmse:0.361831\tval-rmse:0.652924" 
FALSE  8 "[13]\ttrain-rmse:0.278694\tval-rmse:0.662542" 
FALSE  9 "[95]\ttrain-rmse:0.265339\tval-rmse:0.668107" 
FALSE 10 "[21]\ttrain-rmse:0.459728\tval-rmse:0.673707" 
FALSE 11 "[28]\ttrain-rmse:0.173449\tval-rmse:0.674939" 
FALSE 12 "[49]\ttrain-rmse:0.247211\tval-rmse:0.677989" 
FALSE 13 "[80]\ttrain-rmse:0.288392\tval-rmse:0.684942" 
FALSE 14 "[20]\ttrain-rmse:0.528480\tval-rmse:0.686146" 
FALSE 15 "[14]\ttrain-rmse:0.653506\tval-rmse:0.686244" 
FALSE 16 "[102]\ttrain-rmse:0.416878\tval-rmse:0.686613"
FALSE 17 "[173]\ttrain-rmse:0.054351\tval-rmse:0.689267"
FALSE 18 "[47]\ttrain-rmse:0.608462\tval-rmse:0.691494" 
FALSE 19 "[59]\ttrain-rmse:0.355369\tval-rmse:0.694319" 
FALSE 20 "[70]\ttrain-rmse:0.246263\tval-rmse:0.694387"
mod_summary_3d_5m %>% select(best_message)
FALSE # A tibble: 20 × 1
FALSE    best_message                                  
FALSE    <chr>                                         
FALSE  1 "[45]\ttrain-rmse:0.251663\tval-rmse:0.643949"
FALSE  2 "[12]\ttrain-rmse:0.516066\tval-rmse:0.663720"
FALSE  3 "[74]\ttrain-rmse:0.194844\tval-rmse:0.664838"
FALSE  4 "[15]\ttrain-rmse:0.251613\tval-rmse:0.667447"
FALSE  5 "[14]\ttrain-rmse:0.702113\tval-rmse:0.673156"
FALSE  6 "[40]\ttrain-rmse:0.473091\tval-rmse:0.676498"
FALSE  7 "[13]\ttrain-rmse:0.701906\tval-rmse:0.677969"
FALSE  8 "[44]\ttrain-rmse:0.453686\tval-rmse:0.681272"
FALSE  9 "[81]\ttrain-rmse:0.360825\tval-rmse:0.684850"
FALSE 10 "[40]\ttrain-rmse:0.329602\tval-rmse:0.685379"
FALSE 11 "[26]\ttrain-rmse:0.394861\tval-rmse:0.686575"
FALSE 12 "[40]\ttrain-rmse:0.640284\tval-rmse:0.686996"
FALSE 13 "[14]\ttrain-rmse:0.491823\tval-rmse:0.687216"
FALSE 14 "[45]\ttrain-rmse:0.307773\tval-rmse:0.687521"
FALSE 15 "[27]\ttrain-rmse:0.515921\tval-rmse:0.691605"
FALSE 16 "[73]\ttrain-rmse:0.451104\tval-rmse:0.692233"
FALSE 17 "[10]\ttrain-rmse:0.653044\tval-rmse:0.695878"
FALSE 18 "[7]\ttrain-rmse:0.621689\tval-rmse:0.696761" 
FALSE 19 "[12]\ttrain-rmse:0.358257\tval-rmse:0.697600"
FALSE 20 "[37]\ttrain-rmse:0.374047\tval-rmse:0.699012"
mod_summary_3d_51m %>% select(best_message)
FALSE # A tibble: 20 × 1
FALSE    best_message                                   
FALSE    <chr>                                          
FALSE  1 "[63]\ttrain-rmse:0.523729\tval-rmse:0.584816" 
FALSE  2 "[91]\ttrain-rmse:0.289679\tval-rmse:0.587800" 
FALSE  3 "[10]\ttrain-rmse:0.578118\tval-rmse:0.590249" 
FALSE  4 "[48]\ttrain-rmse:0.427781\tval-rmse:0.593098" 
FALSE  5 "[161]\ttrain-rmse:0.219855\tval-rmse:0.599805"
FALSE  6 "[40]\ttrain-rmse:0.077712\tval-rmse:0.603377" 
FALSE  7 "[42]\ttrain-rmse:0.331187\tval-rmse:0.603583" 
FALSE  8 "[53]\ttrain-rmse:0.247760\tval-rmse:0.603798" 
FALSE  9 "[101]\ttrain-rmse:0.124181\tval-rmse:0.611221"
FALSE 10 "[25]\ttrain-rmse:0.478237\tval-rmse:0.612739" 
FALSE 11 "[119]\ttrain-rmse:0.084907\tval-rmse:0.615967"
FALSE 12 "[91]\ttrain-rmse:0.111615\tval-rmse:0.616458" 
FALSE 13 "[76]\ttrain-rmse:0.140558\tval-rmse:0.617523" 
FALSE 14 "[56]\ttrain-rmse:0.321637\tval-rmse:0.619190" 
FALSE 15 "[109]\ttrain-rmse:0.247061\tval-rmse:0.623077"
FALSE 16 "[19]\ttrain-rmse:0.386529\tval-rmse:0.625079" 
FALSE 17 "[38]\ttrain-rmse:0.603413\tval-rmse:0.626733" 
FALSE 18 "[56]\ttrain-rmse:0.179758\tval-rmse:0.627747" 
FALSE 19 "[45]\ttrain-rmse:0.520056\tval-rmse:0.628916" 
FALSE 20 "[106]\ttrain-rmse:0.085112\tval-rmse:0.629479"
mod_summary_3d_71m %>% select(best_message)
FALSE # A tibble: 20 × 1
FALSE    best_message                                   
FALSE    <chr>                                          
FALSE  1 "[120]\ttrain-rmse:0.178971\tval-rmse:0.546375"
FALSE  2 "[87]\ttrain-rmse:0.119423\tval-rmse:0.584757" 
FALSE  3 "[58]\ttrain-rmse:0.519666\tval-rmse:0.588536" 
FALSE  4 "[25]\ttrain-rmse:0.173121\tval-rmse:0.591312" 
FALSE  5 "[143]\ttrain-rmse:0.260337\tval-rmse:0.600821"
FALSE  6 "[52]\ttrain-rmse:0.428538\tval-rmse:0.604501" 
FALSE  7 "[63]\ttrain-rmse:0.015292\tval-rmse:0.604834" 
FALSE  8 "[62]\ttrain-rmse:0.239200\tval-rmse:0.613092" 
FALSE  9 "[26]\ttrain-rmse:0.169868\tval-rmse:0.615110" 
FALSE 10 "[10]\ttrain-rmse:0.493345\tval-rmse:0.616362" 
FALSE 11 "[26]\ttrain-rmse:0.487033\tval-rmse:0.616373" 
FALSE 12 "[92]\ttrain-rmse:0.483514\tval-rmse:0.616810" 
FALSE 13 "[69]\ttrain-rmse:0.090567\tval-rmse:0.617584" 
FALSE 14 "[50]\ttrain-rmse:0.386077\tval-rmse:0.618442" 
FALSE 15 "[53]\ttrain-rmse:0.541363\tval-rmse:0.618449" 
FALSE 16 "[31]\ttrain-rmse:0.405210\tval-rmse:0.618792" 
FALSE 17 "[96]\ttrain-rmse:0.149206\tval-rmse:0.622800" 
FALSE 18 "[173]\ttrain-rmse:0.205208\tval-rmse:0.624057"
FALSE 19 "[28]\ttrain-rmse:0.412852\tval-rmse:0.624704" 
FALSE 20 "[128]\ttrain-rmse:0.180527\tval-rmse:0.625379"
# most of best models are overfit, so looking for train/test RMSE that are closer
optimized_booster_3d_3m <- mod_summary_3d_3m$mod[3][[1]]
optimized_booster_3d_5m <- mod_summary_3d_5m$mod[2][[1]]
optimized_booster_3d_51m <- mod_summary_3d_51m$mod[1][[1]]
optimized_booster_3d_71m <- mod_summary_3d_71m$mod[3][[1]]

# Apply best mod
preds_3 <- val_3 %>% 
  mutate(pred_secchi_3d_5m = predict(optimized_booster_3d_5m, dval_3d_5m),
         pred_secchi_3d_3m = predict(optimized_booster_3d_3m, dval_3d_3m),
         pred_secchi_3d_51m = predict(optimized_booster_3d_51m, dval_3d_51m),
         pred_secchi_3d_71m = predict(optimized_booster_3d_71m, dval_3d_71m))

evals_3 <- preds_3 %>%
  summarise(across(c(pred_secchi_3d_5m, pred_secchi_3d_3m, pred_secchi_3d_51m, pred_secchi_3d_71m),
                   list(rmse = ~rmse(secchi, .),
                        mae = ~mae(secchi, .),
                        mape = ~mape(secchi, .),
                        bias = ~bias(secchi, .),
                        p.bias = ~percent_bias(secchi, .),
                        smape = ~smape(secchi, .),
                        r2 = ~cor(secchi, .)^2), 
                   .names = "{fn}_{col}"))

evals_3
FALSE   rmse_pred_secchi_3d_5m mae_pred_secchi_3d_5m mape_pred_secchi_3d_5m
FALSE 1               0.817311             0.5896351              0.1730654
FALSE   bias_pred_secchi_3d_5m p.bias_pred_secchi_3d_5m smape_pred_secchi_3d_5m
FALSE 1               0.176719              0.008944264               0.1769119
FALSE   r2_pred_secchi_3d_5m rmse_pred_secchi_3d_3m mae_pred_secchi_3d_3m
FALSE 1            0.3490189              0.8109347             0.5731821
FALSE   mape_pred_secchi_3d_3m bias_pred_secchi_3d_3m p.bias_pred_secchi_3d_3m
FALSE 1              0.1657093              0.2063319               0.02247587
FALSE   smape_pred_secchi_3d_3m r2_pred_secchi_3d_3m rmse_pred_secchi_3d_51m
FALSE 1               0.1698005            0.3737727               0.7157991
FALSE   mae_pred_secchi_3d_51m mape_pred_secchi_3d_51m bias_pred_secchi_3d_51m
FALSE 1              0.5126511                0.148163               0.1459944
FALSE   p.bias_pred_secchi_3d_51m smape_pred_secchi_3d_51m r2_pred_secchi_3d_51m
FALSE 1                   0.01299                0.1521153              0.499015
FALSE   rmse_pred_secchi_3d_71m mae_pred_secchi_3d_71m mape_pred_secchi_3d_71m
FALSE 1               0.7106818                0.52801               0.1531263
FALSE   bias_pred_secchi_3d_71m p.bias_pred_secchi_3d_71m smape_pred_secchi_3d_71m
FALSE 1               0.1631748                 0.0195518                0.1572277
FALSE   r2_pred_secchi_3d_71m
FALSE 1              0.508388

Five day window dataset

mod_summary_5d_3m %>% select(best_message)
FALSE # A tibble: 20 × 1
FALSE    best_message                                  
FALSE    <chr>                                         
FALSE  1 "[11]\ttrain-rmse:0.327546\tval-rmse:0.980860"
FALSE  2 "[21]\ttrain-rmse:0.299935\tval-rmse:0.993849"
FALSE  3 "[24]\ttrain-rmse:0.334483\tval-rmse:0.996286"
FALSE  4 "[9]\ttrain-rmse:0.540934\tval-rmse:1.011602" 
FALSE  5 "[39]\ttrain-rmse:0.307835\tval-rmse:1.013431"
FALSE  6 "[31]\ttrain-rmse:0.434795\tval-rmse:1.019757"
FALSE  7 "[14]\ttrain-rmse:0.411971\tval-rmse:1.025668"
FALSE  8 "[56]\ttrain-rmse:0.310286\tval-rmse:1.039395"
FALSE  9 "[10]\ttrain-rmse:0.256525\tval-rmse:1.040997"
FALSE 10 "[10]\ttrain-rmse:0.545060\tval-rmse:1.042106"
FALSE 11 "[21]\ttrain-rmse:0.236715\tval-rmse:1.044129"
FALSE 12 "[13]\ttrain-rmse:0.571564\tval-rmse:1.046111"
FALSE 13 "[33]\ttrain-rmse:0.358365\tval-rmse:1.048972"
FALSE 14 "[54]\ttrain-rmse:0.159286\tval-rmse:1.049130"
FALSE 15 "[26]\ttrain-rmse:0.234417\tval-rmse:1.050759"
FALSE 16 "[15]\ttrain-rmse:0.401728\tval-rmse:1.050817"
FALSE 17 "[40]\ttrain-rmse:0.397549\tval-rmse:1.053423"
FALSE 18 "[38]\ttrain-rmse:0.233800\tval-rmse:1.054601"
FALSE 19 "[41]\ttrain-rmse:0.451990\tval-rmse:1.054910"
FALSE 20 "[38]\ttrain-rmse:0.481745\tval-rmse:1.056298"
mod_summary_5d_5m %>% select(best_message)
FALSE # A tibble: 20 × 1
FALSE    best_message                                  
FALSE    <chr>                                         
FALSE  1 "[19]\ttrain-rmse:0.319591\tval-rmse:0.952580"
FALSE  2 "[20]\ttrain-rmse:0.437341\tval-rmse:0.976179"
FALSE  3 "[22]\ttrain-rmse:0.284349\tval-rmse:0.993413"
FALSE  4 "[33]\ttrain-rmse:0.054067\tval-rmse:1.003331"
FALSE  5 "[35]\ttrain-rmse:0.360349\tval-rmse:1.004167"
FALSE  6 "[14]\ttrain-rmse:0.263795\tval-rmse:1.008768"
FALSE  7 "[21]\ttrain-rmse:0.403337\tval-rmse:1.008797"
FALSE  8 "[39]\ttrain-rmse:0.381127\tval-rmse:1.010368"
FALSE  9 "[12]\ttrain-rmse:0.400475\tval-rmse:1.011368"
FALSE 10 "[38]\ttrain-rmse:0.151418\tval-rmse:1.012041"
FALSE 11 "[14]\ttrain-rmse:0.429832\tval-rmse:1.012421"
FALSE 12 "[15]\ttrain-rmse:0.460666\tval-rmse:1.013479"
FALSE 13 "[56]\ttrain-rmse:0.388745\tval-rmse:1.013859"
FALSE 14 "[52]\ttrain-rmse:0.316780\tval-rmse:1.014898"
FALSE 15 "[18]\ttrain-rmse:0.302212\tval-rmse:1.016566"
FALSE 16 "[11]\ttrain-rmse:0.258037\tval-rmse:1.018300"
FALSE 17 "[69]\ttrain-rmse:0.337460\tval-rmse:1.019363"
FALSE 18 "[64]\ttrain-rmse:0.382138\tval-rmse:1.021017"
FALSE 19 "[39]\ttrain-rmse:0.063766\tval-rmse:1.021045"
FALSE 20 "[68]\ttrain-rmse:0.358500\tval-rmse:1.021180"
mod_summary_5d_51m %>% select(best_message)
FALSE # A tibble: 20 × 1
FALSE    best_message                                  
FALSE    <chr>                                         
FALSE  1 "[37]\ttrain-rmse:0.170404\tval-rmse:0.939598"
FALSE  2 "[16]\ttrain-rmse:0.227196\tval-rmse:0.977729"
FALSE  3 "[14]\ttrain-rmse:0.476502\tval-rmse:0.977982"
FALSE  4 "[18]\ttrain-rmse:0.246417\tval-rmse:0.979007"
FALSE  5 "[36]\ttrain-rmse:0.245908\tval-rmse:0.981467"
FALSE  6 "[18]\ttrain-rmse:0.408781\tval-rmse:0.992688"
FALSE  7 "[46]\ttrain-rmse:0.152733\tval-rmse:0.994134"
FALSE  8 "[10]\ttrain-rmse:0.323117\tval-rmse:0.995858"
FALSE  9 "[19]\ttrain-rmse:0.355387\tval-rmse:0.996048"
FALSE 10 "[25]\ttrain-rmse:0.233858\tval-rmse:1.007492"
FALSE 11 "[52]\ttrain-rmse:0.377163\tval-rmse:1.010191"
FALSE 12 "[68]\ttrain-rmse:0.111663\tval-rmse:1.015421"
FALSE 13 "[21]\ttrain-rmse:0.383587\tval-rmse:1.016748"
FALSE 14 "[54]\ttrain-rmse:0.267656\tval-rmse:1.016777"
FALSE 15 "[66]\ttrain-rmse:0.250886\tval-rmse:1.019857"
FALSE 16 "[23]\ttrain-rmse:0.369208\tval-rmse:1.024546"
FALSE 17 "[10]\ttrain-rmse:0.504929\tval-rmse:1.025554"
FALSE 18 "[15]\ttrain-rmse:0.190793\tval-rmse:1.026910"
FALSE 19 "[11]\ttrain-rmse:0.357269\tval-rmse:1.029359"
FALSE 20 "[46]\ttrain-rmse:0.262580\tval-rmse:1.031247"
mod_summary_5d_71m %>% select(best_message)
FALSE # A tibble: 20 × 1
FALSE    best_message                                  
FALSE    <chr>                                         
FALSE  1 "[12]\ttrain-rmse:0.418125\tval-rmse:0.959395"
FALSE  2 "[21]\ttrain-rmse:0.195162\tval-rmse:1.005950"
FALSE  3 "[22]\ttrain-rmse:0.307149\tval-rmse:1.009616"
FALSE  4 "[35]\ttrain-rmse:0.296614\tval-rmse:1.012367"
FALSE  5 "[10]\ttrain-rmse:0.439556\tval-rmse:1.022235"
FALSE  6 "[18]\ttrain-rmse:0.358194\tval-rmse:1.036492"
FALSE  7 "[37]\ttrain-rmse:0.217485\tval-rmse:1.053069"
FALSE  8 "[14]\ttrain-rmse:0.400128\tval-rmse:1.057614"
FALSE  9 "[73]\ttrain-rmse:0.226827\tval-rmse:1.057913"
FALSE 10 "[39]\ttrain-rmse:0.424131\tval-rmse:1.057921"
FALSE 11 "[9]\ttrain-rmse:0.257221\tval-rmse:1.060352" 
FALSE 12 "[24]\ttrain-rmse:0.322158\tval-rmse:1.069728"
FALSE 13 "[9]\ttrain-rmse:0.485880\tval-rmse:1.071495" 
FALSE 14 "[29]\ttrain-rmse:0.128647\tval-rmse:1.071771"
FALSE 15 "[28]\ttrain-rmse:0.182173\tval-rmse:1.073599"
FALSE 16 "[31]\ttrain-rmse:0.448011\tval-rmse:1.074643"
FALSE 17 "[14]\ttrain-rmse:0.261694\tval-rmse:1.077375"
FALSE 18 "[40]\ttrain-rmse:0.401865\tval-rmse:1.077975"
FALSE 19 "[38]\ttrain-rmse:0.332829\tval-rmse:1.077997"
FALSE 20 "[36]\ttrain-rmse:0.460572\tval-rmse:1.078719"
# most of best models are overfit, so looking for train/test RMSE that are closer
optimized_booster_5d_3m <- mod_summary_5d_3m$mod[4][[1]] # this one is quite overfit - train rmse is 0.5, test is 1
optimized_booster_5d_5m <- mod_summary_5d_5m$mod[2][[1]] # all these options are pretty overfit. 
optimized_booster_5d_51m <- mod_summary_5d_51m$mod[3][[1]] # still overfit
optimized_booster_5d_71m <- mod_summary_5d_71m$mod[1][[1]] # again, pretty overfit

# Apply best mod
preds_5 <- val_5 %>% 
  mutate(pred_secchi_5d_5m = predict(optimized_booster_5d_5m, dval_5d_5m),
         pred_secchi_5d_3m = predict(optimized_booster_5d_3m, dval_5d_3m),
         pred_secchi_5d_71m = predict(optimized_booster_5d_71m, dval_5d_71m),
         pred_secchi_5d_51m = predict(optimized_booster_5d_51m, dval_5d_51m))

evals_5 <- preds_5 %>%
  summarise(across(c(pred_secchi_5d_5m, pred_secchi_5d_3m, pred_secchi_5d_71m, pred_secchi_5d_51m),
                   list(rmse = ~rmse(secchi, .),
                        mae = ~mae(secchi, .),
                        mape = ~mape(secchi, .),
                        bias = ~bias(secchi, .),
                        p.bias = ~percent_bias(secchi, .),
                        smape = ~smape(secchi, .),
                        r2 = ~cor(secchi, .)^2), 
                   .names = "{fn}_{col}"))

evals_5
FALSE   rmse_pred_secchi_5d_5m mae_pred_secchi_5d_5m mape_pred_secchi_5d_5m
FALSE 1              0.8916389             0.7046072              0.1942366
FALSE   bias_pred_secchi_5d_5m p.bias_pred_secchi_5d_5m smape_pred_secchi_5d_5m
FALSE 1              0.2590617               0.03325661               0.2014614
FALSE   r2_pred_secchi_5d_5m rmse_pred_secchi_5d_3m mae_pred_secchi_5d_3m
FALSE 1            0.4403616               1.038295             0.8179092
FALSE   mape_pred_secchi_5d_3m bias_pred_secchi_5d_3m p.bias_pred_secchi_5d_3m
FALSE 1              0.2282917              0.3925272               0.06173814
FALSE   smape_pred_secchi_5d_3m r2_pred_secchi_5d_3m rmse_pred_secchi_5d_71m
FALSE 1               0.2424634            0.3195622               0.9297341
FALSE   mae_pred_secchi_5d_71m mape_pred_secchi_5d_71m bias_pred_secchi_5d_71m
FALSE 1              0.7264634               0.2040276               0.3166904
FALSE   p.bias_pred_secchi_5d_71m smape_pred_secchi_5d_71m r2_pred_secchi_5d_71m
FALSE 1                0.05325865                0.2155079             0.4374517
FALSE   rmse_pred_secchi_5d_51m mae_pred_secchi_5d_51m mape_pred_secchi_5d_51m
FALSE 1               0.9191537              0.7264615               0.2020312
FALSE   bias_pred_secchi_5d_51m p.bias_pred_secchi_5d_51m smape_pred_secchi_5d_51m
FALSE 1               0.2477498                0.03405154                0.2099985
FALSE   r2_pred_secchi_5d_51m
FALSE 1             0.4315663

Jemma-special window dataset

mod_summary_jd_3m %>% select(best_message)
FALSE # A tibble: 20 × 1
FALSE    best_message                                  
FALSE    <chr>                                         
FALSE  1 "[5]\ttrain-rmse:1.053405\tval-rmse:0.732145" 
FALSE  2 "[14]\ttrain-rmse:0.601904\tval-rmse:0.734536"
FALSE  3 "[21]\ttrain-rmse:0.649525\tval-rmse:0.735325"
FALSE  4 "[22]\ttrain-rmse:0.758808\tval-rmse:0.739086"
FALSE  5 "[15]\ttrain-rmse:0.529075\tval-rmse:0.740712"
FALSE  6 "[21]\ttrain-rmse:0.679336\tval-rmse:0.741152"
FALSE  7 "[22]\ttrain-rmse:0.616632\tval-rmse:0.741287"
FALSE  8 "[19]\ttrain-rmse:0.752240\tval-rmse:0.743072"
FALSE  9 "[21]\ttrain-rmse:0.781823\tval-rmse:0.745712"
FALSE 10 "[38]\ttrain-rmse:0.508722\tval-rmse:0.746619"
FALSE 11 "[24]\ttrain-rmse:0.618787\tval-rmse:0.746728"
FALSE 12 "[25]\ttrain-rmse:0.487934\tval-rmse:0.748918"
FALSE 13 "[21]\ttrain-rmse:0.685765\tval-rmse:0.750516"
FALSE 14 "[22]\ttrain-rmse:0.651607\tval-rmse:0.750522"
FALSE 15 "[23]\ttrain-rmse:0.574940\tval-rmse:0.751599"
FALSE 16 "[9]\ttrain-rmse:0.697727\tval-rmse:0.752579" 
FALSE 17 "[23]\ttrain-rmse:0.570996\tval-rmse:0.752864"
FALSE 18 "[8]\ttrain-rmse:0.665623\tval-rmse:0.753821" 
FALSE 19 "[21]\ttrain-rmse:0.685295\tval-rmse:0.754233"
FALSE 20 "[23]\ttrain-rmse:0.627481\tval-rmse:0.756205"
mod_summary_jd_5m %>% select(best_message)
FALSE # A tibble: 20 × 1
FALSE    best_message                                  
FALSE    <chr>                                         
FALSE  1 "[24]\ttrain-rmse:0.589641\tval-rmse:0.775547"
FALSE  2 "[7]\ttrain-rmse:0.641924\tval-rmse:0.791019" 
FALSE  3 "[5]\ttrain-rmse:0.875744\tval-rmse:0.795134" 
FALSE  4 "[8]\ttrain-rmse:0.579254\tval-rmse:0.798143" 
FALSE  5 "[6]\ttrain-rmse:0.688624\tval-rmse:0.802263" 
FALSE  6 "[22]\ttrain-rmse:0.753772\tval-rmse:0.802359"
FALSE  7 "[20]\ttrain-rmse:0.676896\tval-rmse:0.807530"
FALSE  8 "[7]\ttrain-rmse:0.813274\tval-rmse:0.810102" 
FALSE  9 "[22]\ttrain-rmse:0.753550\tval-rmse:0.811635"
FALSE 10 "[21]\ttrain-rmse:0.727240\tval-rmse:0.813047"
FALSE 11 "[21]\ttrain-rmse:0.842255\tval-rmse:0.813709"
FALSE 12 "[19]\ttrain-rmse:0.913022\tval-rmse:0.814985"
FALSE 13 "[22]\ttrain-rmse:0.698649\tval-rmse:0.816389"
FALSE 14 "[22]\ttrain-rmse:0.746893\tval-rmse:0.818785"
FALSE 15 "[25]\ttrain-rmse:0.730175\tval-rmse:0.820589"
FALSE 16 "[22]\ttrain-rmse:0.598218\tval-rmse:0.821636"
FALSE 17 "[19]\ttrain-rmse:0.776063\tval-rmse:0.822102"
FALSE 18 "[7]\ttrain-rmse:0.795072\tval-rmse:0.822194" 
FALSE 19 "[23]\ttrain-rmse:0.671616\tval-rmse:0.823878"
FALSE 20 "[23]\ttrain-rmse:0.754198\tval-rmse:0.824009"
mod_summary_jd_51m %>% select(best_message)
FALSE # A tibble: 20 × 1
FALSE    best_message                                  
FALSE    <chr>                                         
FALSE  1 "[11]\ttrain-rmse:0.563328\tval-rmse:0.719620"
FALSE  2 "[20]\ttrain-rmse:0.640292\tval-rmse:0.732055"
FALSE  3 "[9]\ttrain-rmse:0.704207\tval-rmse:0.739378" 
FALSE  4 "[7]\ttrain-rmse:0.578080\tval-rmse:0.748831" 
FALSE  5 "[31]\ttrain-rmse:0.610108\tval-rmse:0.757068"
FALSE  6 "[8]\ttrain-rmse:0.594283\tval-rmse:0.765976" 
FALSE  7 "[21]\ttrain-rmse:0.594361\tval-rmse:0.771174"
FALSE  8 "[8]\ttrain-rmse:0.488876\tval-rmse:0.777529" 
FALSE  9 "[6]\ttrain-rmse:0.695668\tval-rmse:0.777999" 
FALSE 10 "[6]\ttrain-rmse:0.641776\tval-rmse:0.783015" 
FALSE 11 "[8]\ttrain-rmse:0.723066\tval-rmse:0.783788" 
FALSE 12 "[31]\ttrain-rmse:0.592202\tval-rmse:0.787315"
FALSE 13 "[22]\ttrain-rmse:0.763185\tval-rmse:0.792019"
FALSE 14 "[25]\ttrain-rmse:0.273874\tval-rmse:0.797882"
FALSE 15 "[19]\ttrain-rmse:0.674177\tval-rmse:0.798889"
FALSE 16 "[27]\ttrain-rmse:0.737569\tval-rmse:0.799222"
FALSE 17 "[25]\ttrain-rmse:0.518851\tval-rmse:0.799871"
FALSE 18 "[7]\ttrain-rmse:0.538728\tval-rmse:0.801856" 
FALSE 19 "[19]\ttrain-rmse:0.672952\tval-rmse:0.804626"
FALSE 20 "[25]\ttrain-rmse:0.714120\tval-rmse:0.806391"
mod_summary_jd_71m %>% select(best_message)
FALSE # A tibble: 20 × 1
FALSE    best_message                                  
FALSE    <chr>                                         
FALSE  1 "[25]\ttrain-rmse:0.706391\tval-rmse:0.688101"
FALSE  2 "[22]\ttrain-rmse:0.621843\tval-rmse:0.711899"
FALSE  3 "[24]\ttrain-rmse:0.541285\tval-rmse:0.722433"
FALSE  4 "[8]\ttrain-rmse:0.622469\tval-rmse:0.723832" 
FALSE  5 "[22]\ttrain-rmse:0.582793\tval-rmse:0.732713"
FALSE  6 "[37]\ttrain-rmse:0.494461\tval-rmse:0.733796"
FALSE  7 "[22]\ttrain-rmse:0.577833\tval-rmse:0.736067"
FALSE  8 "[22]\ttrain-rmse:0.589941\tval-rmse:0.739976"
FALSE  9 "[25]\ttrain-rmse:0.528916\tval-rmse:0.740118"
FALSE 10 "[7]\ttrain-rmse:0.598981\tval-rmse:0.748153" 
FALSE 11 "[31]\ttrain-rmse:0.353413\tval-rmse:0.749748"
FALSE 12 "[6]\ttrain-rmse:0.671189\tval-rmse:0.749940" 
FALSE 13 "[27]\ttrain-rmse:0.505670\tval-rmse:0.751821"
FALSE 14 "[21]\ttrain-rmse:0.600679\tval-rmse:0.752852"
FALSE 15 "[26]\ttrain-rmse:0.450615\tval-rmse:0.754393"
FALSE 16 "[24]\ttrain-rmse:0.554773\tval-rmse:0.756088"
FALSE 17 "[20]\ttrain-rmse:0.636889\tval-rmse:0.758873"
FALSE 18 "[22]\ttrain-rmse:0.590224\tval-rmse:0.759143"
FALSE 19 "[21]\ttrain-rmse:0.633044\tval-rmse:0.760393"
FALSE 20 "[23]\ttrain-rmse:0.591821\tval-rmse:0.760800"
# best mod looks good here across the board
optimized_booster_jd_3m <- mod_summary_jd_3m$mod[1][[1]]
optimized_booster_jd_5m <- mod_summary_jd_5m$mod[1][[1]]
optimized_booster_jd_51m <- mod_summary_jd_51m$mod[1][[1]]
optimized_booster_jd_71m <- mod_summary_jd_71m$mod[1][[1]]

# Apply best mod
preds_jd <- val_j %>% 
  mutate(pred_secchi_jd_5m = predict(optimized_booster_jd_5m, dval_jd_5m),
         pred_secchi_jd_3m = predict(optimized_booster_jd_3m, dval_jd_3m),
         pred_secchi_jd_51m = predict(optimized_booster_jd_51m, dval_jd_51m),
         pred_secchi_jd_71m = predict(optimized_booster_jd_71m, dval_jd_71m))

evals_jd <- preds_jd %>%
  summarise(across(c(pred_secchi_jd_5m, pred_secchi_jd_3m, pred_secchi_jd_71m, pred_secchi_jd_51m),
                   list(rmse = ~rmse(secchi, .),
                        mae = ~mae(secchi, .),
                        mape = ~mape(secchi, .),
                        bias = ~bias(secchi, .),
                        p.bias = ~percent_bias(secchi, .),
                        smape = ~smape(secchi, .),
                        r2 = ~cor(secchi, .)^2), 
                   .names = "{fn}_{col}"))

evals_jd
FALSE   rmse_pred_secchi_jd_5m mae_pred_secchi_jd_5m mape_pred_secchi_jd_5m
FALSE 1              0.7238251             0.6132381              0.1980581
FALSE   bias_pred_secchi_jd_5m p.bias_pred_secchi_jd_5m smape_pred_secchi_jd_5m
FALSE 1              0.1221853               -0.0223849               0.1909397
FALSE   r2_pred_secchi_jd_5m rmse_pred_secchi_jd_3m mae_pred_secchi_jd_3m
FALSE 1            0.6242054               0.904584             0.7361402
FALSE   mape_pred_secchi_jd_3m bias_pred_secchi_jd_3m p.bias_pred_secchi_jd_3m
FALSE 1              0.2167459              0.3849171               0.05141815
FALSE   smape_pred_secchi_jd_3m r2_pred_secchi_jd_3m rmse_pred_secchi_jd_71m
FALSE 1               0.2253142             0.506181               0.7583794
FALSE   mae_pred_secchi_jd_71m mape_pred_secchi_jd_71m bias_pred_secchi_jd_71m
FALSE 1               0.634782               0.2111927               0.1133909
FALSE   p.bias_pred_secchi_jd_71m smape_pred_secchi_jd_71m r2_pred_secchi_jd_71m
FALSE 1               -0.03120633                0.2017568             0.5747047
FALSE   rmse_pred_secchi_jd_51m mae_pred_secchi_jd_51m mape_pred_secchi_jd_51m
FALSE 1               0.8313475               0.694705               0.2483355
FALSE   bias_pred_secchi_jd_51m p.bias_pred_secchi_jd_51m smape_pred_secchi_jd_51m
FALSE 1              -0.1823586                -0.1207984                0.2188396
FALSE   r2_pred_secchi_jd_51m
FALSE 1             0.4852026

Model Performance - 3 day window

Model Performance - 5 day window

Keep in mind that all of these models seemed overfit in the train/test sets.

Considering that the train/test metrics seemed overfit, the validation look pretty good here.

Model Performance - Jemma window

Applying model to full data

By looking at the train/test results, the validation results (especially the model performance at higher Secchi estimates), and the overall r^2 of the validation, it looks like the 7/1 day window with the 5/1 day met data is the best performing. This is a relatively subjective decision - and the end user can absolutely make a different decision.

features = band_met51_feats
model = optimized_booster_jd_51m
met = '5/1 day met summaries'
window = '7/1 day window'

full_stack <- read_csv('data/upstreamRS/yojoa_corr_rrs_met_scaled_v2023-06-15.csv') %>%
  mutate(secchi = 100) %>%
  prepData(.) 

stack_xgb <- xgb.DMatrix(data = as.matrix(full_stack[,features]))

full_stack_simp <- full_stack %>%
  mutate(secchi = predict(model, stack_xgb)) %>%
  select(date, location, secchi, mission) 

situ_stack <- read_csv('data/in-situ/Secchi_completedataset.csv') %>%
  mutate(secchi = as.numeric(secchi),
         date = mdy(date)) %>%
  filter(!is.na(secchi)) %>%
  mutate(mission = 'Measured') %>%
  bind_rows(full_stack_simp)%>% 
  mutate(location = gsub(' ', '', location))

Let’s look at each of the site records alongside the Landsat-estimated Secchi depth.

FALSE [[1]]

FALSE 
FALSE [[2]]

FALSE 
FALSE [[3]]

FALSE 
FALSE [[4]]

FALSE 
FALSE [[5]]

FALSE 
FALSE [[6]]

FALSE 
FALSE [[7]]

FALSE 
FALSE [[8]]

FALSE 
FALSE [[9]]

FALSE 
FALSE [[10]]

FALSE 
FALSE [[11]]

FALSE 
FALSE [[12]]

FALSE 
FALSE [[13]]

FALSE 
FALSE [[14]]

FALSE 
FALSE [[15]]

FALSE 
FALSE [[16]]

FALSE 
FALSE [[17]]

FALSE 
FALSE [[18]]

Look at recent data per location

plotRecentBySite = function(site) {
  ggplot(situ_stack %>%
           filter(location == site), aes(x = date, y = secchi, color = mission,
                                        shape = mission)) + 
    geom_point() + 
    labs(title = paste0('Yojoa Secchi 2018-2022 - Site ', site),
         subtitle = paste0(window, ', ', met, ', moderate data stringency'),
         y = 'Secchi (m)',
         color = 'data source', shape = 'data source') +
    scale_color_manual(values = c('grey10','grey30','grey50','grey70','blue')) + 
    theme_few() +
    theme(legend.position = c(0.8,0.8)) + 
    scale_shape_manual(values = c(19,19,19,19,1)) +
    scale_y_continuous(limits = c(0, max(situ_stack$secchi)), breaks = seq(0, max(situ_stack$secchi), 2)) +
    scale_x_date(limits = c(ymd('2018-01-01'), max(situ_stack$date))) +
    theme(plot.title = element_text(hjust = 0.5, face = 'bold'),
          plot.subtitle = element_text(hjust = 0.5))
}

map(sort(unique(situ_stack$location)), plotRecentBySite)
FALSE [[1]]

FALSE 
FALSE [[2]]

FALSE 
FALSE [[3]]

FALSE 
FALSE [[4]]

FALSE 
FALSE [[5]]

FALSE 
FALSE [[6]]

FALSE 
FALSE [[7]]

FALSE 
FALSE [[8]]

FALSE 
FALSE [[9]]

FALSE 
FALSE [[10]]

FALSE 
FALSE [[11]]

FALSE 
FALSE [[12]]

FALSE 
FALSE [[13]]

FALSE 
FALSE [[14]]

FALSE 
FALSE [[15]]

FALSE 
FALSE [[16]]

FALSE 
FALSE [[17]]

FALSE 
FALSE [[18]]

Whole lake secchi dynamics

While there is plenty of variability across the lake, let’s summarize to a single value per date, since not all sites have the same density of record. Since there are a few oddballs in here (both in terms of measured and estimated), we’ll use the median Secchi across all sites.

lake_med <- situ_stack %>%
  group_by(date,mission) %>%
  summarize(across(where(is.numeric),median))

2006

Recent

ggplot(lake_med, aes(x = date, y = secchi, color = mission, shape = mission)) + 
  geom_point() + 
  scale_color_manual(values = c('grey10','grey30','grey50','grey70','blue')) + 
  theme_few() +
  labs(title = 'Yojoa Secchi 2018-2022\nwhole-lake median',
         subtitle = paste0(window, ', ', met, ', moderate data stringency'),
       y = 'median Secchi (m)',
       color = 'data source', shape = 'data source') +
  theme(legend.position = c(0.8,0.8)) + 
  scale_shape_manual(values = c(19,19,19,19,1)) +
  scale_x_date(limits = c(as.Date('2018-01-01'), as.Date('2023-01-01')))+
  theme(plot.title = element_text(hjust = 0.5, face = 'bold'),
        plot.subtitle = element_text(hjust = 0.5))